目標很單純:把一個 .OBJ 模型檔載入→轉成頂點/索引→丟到 GPU→畫在
<canvas>
上。
我們用最保守、好懂的做法:OBJ(位置/法線/UV)→ Storage 到 Vertex/Index Buffer → Uniform 裝相機矩陣 → Lambert 光照。整份程式可以直接接在你前面「Day16/17」的專案裡。
一個能跑 WebGPU 的環境(Chrome/Edge、https 或 localhost)。
專案裡有 index.html
+ main.ts
/main.js
,且上一章的「初始化 + 清畫面」能跑。
一個 .obj 檔(含法線 vn
、可有可無 vt
),例如 assets/monkey.obj
。
初學先選少面數模型(幾千面內),避免第一次就卡在效能或記憶體。
[fetch + 解析 .OBJ] → [去重組頂點(位置/法線/UV) + 建 indices]
↓
[建立 VertexBuffer/IndexBuffer]
↓
[建立 UniformBuffer (MVP/Normal/光)]
↓
[建立 RenderPipeline(VS/FS + 頂點格式 + Depth)]
↓
每幀:
更新 Uniform(相機/旋轉)
beginRenderPass → setPipeline/BindGroup/Vertex/Index → drawIndexed → end
submit → present
畫 3D 幾乎都會有這三樣:
vec3
vec3
(打光用)vec2
(今天先不取樣,之後可加)一個頂點的記憶體排法(interleaved):[pos.x, pos.y, pos.z, nor.x, nor.y, nor.z, uv.u, uv.v]
→ 共 32 bytes。
目的:把 .obj 的「面 f」轉成唯一的頂點陣列 + 索引陣列(去重),這樣能配合
drawIndexed
高效繪製。
type MeshCPU = { vertices: Float32Array; indices: Uint32Array };
function parseOBJ(objText: string): MeshCPU {
const P: number[] = [], T: number[] = [], N: number[] = [];
const uniq = new Map<string, number>();
const verts: number[] = [];
const idx: number[] = [];
const lines = objText.split(/\r?\n/);
for (const ln of lines) {
const s = ln.trim();
if (s.startsWith('v ')) {
const [, x, y, z] = s.split(/\s+/);
P.push(+x, +y, +z);
} else if (s.startsWith('vt ')) {
const [, u, v] = s.split(/\s+/);
T.push(+u, +v);
} else if (s.startsWith('vn ')) {
const [, x, y, z] = s.split(/\s+/);
N.push(+x, +y, +z);
} else if (s.startsWith('f ')) {
const parts = s.substring(2).trim().split(/\s+/);
// 可能是三角或四角,四角切成兩個三角
const tri = (a: string, b: string, c: string) => {
for (const p of [a, b, c]) {
let key = p;
let id = uniq.get(key);
if (id === undefined) {
const [viS, vtiS, vniS] = key.split('/');
const vi = (parseInt(viS) - 1) * 3;
const ti = vtiS ? (parseInt(vtiS) - 1) * 2 : -2;
const ni = vniS ? (parseInt(vniS) - 1) * 3 : -3;
const px = P[vi], py = P[vi + 1], pz = P[vi + 2];
const nx = (ni >= 0 ? N[ni] : 0), ny = (ni >= 0 ? N[ni + 1] : 0), nz = (ni >= 0 ? N[ni + 2] : 1);
const tu = (ti >= 0 ? T[ti] : 0), tv = (ti >= 0 ? T[ti + 1] : 0);
id = verts.length / 8;
verts.push(px, py, pz, nx, ny, nz, tu, 1 - tv); // 翻 V,習慣性
uniq.set(key, id);
}
idx.push(id);
}
};
if (parts.length === 3) tri(parts[0], parts[1], parts[2]);
else if (parts.length === 4) { // quad
tri(parts[0], parts[1], parts[2]);
tri(parts[0], parts[2], parts[3]);
} // 更多邊數可進一步三角化
}
}
return { vertices: new Float32Array(verts), indices: new Uint32Array(idx) };
}
這個解析器只支援常見的
v/vt/vn
,足夠入門。之後想用 glTF 可以換成現成 loader,或自己寫二進位解析。
max(dot(N, L), 0)
)+ 一點環境光看起來不會全黑。struct Uniforms {
mvp : mat4x4<f32>,
normalMat : mat4x4<f32>,
lightDir : vec4<f32> // xyz 用,w 佔位
};
@group(0) @binding(0) var<uniform> U : Uniforms;
struct VSOut {
@builtin(position) pos : vec4<f32>,
@location(0) normalWS : vec3<f32>
};
@vertex
fn vs_main(@location(0) inPos: vec3<f32>,
@location(1) inNor: vec3<f32>,
@location(2) inUV : vec2<f32>) -> VSOut {
var o: VSOut;
o.pos = U.mvp * vec4<f32>(inPos, 1.0);
o.pos = vec4<f32>(inPos, 1.0);
// 用 normalMat 把法線轉到世界(用 w=0 不受平移影響)
o.normalWS = normalize((U.normalMat * vec4<f32>(inNor, 0.0)).xyz);
return o;
}
@fragment
fn fs_main(@location(0) N: vec3<f32>) -> @location(0) vec4<f32> {
let L = normalize(U.lightDir.xyz);
let lambert = max(dot(N, L), 0.0);
let ambient = 0.08;
let col = vec3<f32>(0.75, 0.75, 0.80) * (ambient + lambert);
return vec4<f32>(col, 1.0);
}
為了自足,這份程式含:簡易矩陣工具、深度貼圖、resize、每幀旋轉。把檔名改路徑就能跑。
index.html
<!doctype html>
<html lang="zh-Hant">
<head>
<meta charset="utf-8"/><meta name="viewport" content="width=device-width,initial-scale=1"/>
<title>WebGPU Model</title>
<style>html,body{height:100%;margin:0;background:#0b1220}canvas{width:100vw;height:100vh;display:block}</style>
</head>
<body>
<canvas id="gfx"></canvas>
<script type="module" src="./main.js"></script>
</body>
</html>
main.js / main.ts(可直接貼;若用 TS,型別自行加上)
// ---------- WebGPU 初始化 ----------
async function getDevice() {
if (!('gpu' in navigator)) throw new Error('WebGPU needs https/localhost');
const adapter = await navigator.gpu.requestAdapter();
if (!adapter) throw new Error('No adapter');
return await adapter.requestDevice();
}
// ---------- 矩陣/向量小工具(夠用版) ----------
const m4 = {
ident: () => new Float32Array([1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1]),
mul(a,b){const o=new Float32Array(16);
for(let r=0;r<4;r++) for(let c=0;c<4;c++){
o[r*4+c]=a[r*4+0]*b[0*4+c]+a[r*4+1]*b[1*4+c]+a[r*4+2]*b[2*4+c]+a[r*4+3]*b[3*4+c];
} return o;
},
perspective(fovy,aspect,near,far){
const f=1/Math.tan(fovy/2), nf=1/(near-far);
return new Float32Array([f/aspect,0,0,0, 0,f,0,0, 0,0,(far+near)*nf,-1, 0,0,(2*far*near)*nf,0]);
},
lookAt(eye,center,up){
const zx=eye[0]-center[0], zy=eye[1]-center[1], zz=eye[2]-center[2];
let zl=1/Math.hypot(zx,zy,zz); const zxN=zx*zl, zyN=zy*zl, zzN=zz*zl;
let xx=up[1]*zzN-up[2]*zyN, xy=up[2]*zxN-up[0]*zzN, xz=up[0]*zyN-up[1]*zxN;
let xl=1/Math.hypot(xx,xy,xz); xx*=xl; xy*=xl; xz*=xl;
const yx=zyN*xz-zzN*xy, yy=zzN*xx-zxN*xz, yz=zxN*xy-zyN*xx;
return new Float32Array([xx,yx,zxN,0, xy,yy,zyN,0, xz,yz,zzN,0,
-(xx*eye[0]+xy*eye[1]+xz*eye[2]),
-(yx*eye[0]+yy*eye[1]+yz*eye[2]),
-(zxN*eye[0]+zyN*eye[1]+zzN*eye[2]), 1]);
},
rotateY(rad){
const c=Math.cos(rad), s=Math.sin(rad);
return new Float32Array([ c,0,-s,0, 0,1,0,0, s,0,c,0, 0,0,0,1 ]);
},
inverseTranspose3x3Into4x4(model){
// 簡化:只處理旋轉/等比縮放(入門足夠)
// 嚴格版要做 3x3 inverse-transpose;這裡給單位矩陣或旋轉也正確
return model; // 入門版:若只旋轉/平移,model 就可用來轉法線
}
};
// ---------- 主流程 ----------
const canvas = document.getElementById('gfx');
const context = canvas.getContext('webgpu');
const device = await getDevice();
const format = navigator.gpu.getPreferredCanvasFormat();
function resize() {
const dpr = Math.min(2, window.devicePixelRatio||1);
canvas.width = Math.floor(canvas.clientWidth * dpr);
canvas.height = Math.floor(canvas.clientHeight * dpr);
context.configure({ device, format, alphaMode:'opaque' });
}
window.addEventListener('resize', resize); resize();
// 讀入模型(把路徑換成你的 .obj)
const objText = await (await fetch('sphere.obj')).text();
const meshCPU = parseOBJ(objText);
// 建立 GPU Buffer(頂點/索引/Uniform)
const vbuf = device.createBuffer({
size: meshCPU.vertices.byteLength,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST
});
device.queue.writeBuffer(vbuf, 0, meshCPU.vertices);
const ibuf = device.createBuffer({
size: meshCPU.indices.byteLength,
usage: GPUBufferUsage.INDEX | GPUBufferUsage.COPY_DST
});
device.queue.writeBuffer(ibuf, 0, meshCPU.indices);
const uboSize = 16*4 * 2 + 16; // mvp(64) + normalMat(64) + lightDir(vec4=16)
const ubo = device.createBuffer({
size: uboSize,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
});
// Shader
const shaderCode = await (await fetch('shader.wgsl')).text();
const shaderModule = device.createShaderModule({ code: shaderCode });
// BindGroup(Uniform)
const bindGroupLayout = device.createBindGroupLayout({
entries: [{ binding:0, visibility: GPUShaderStage.VERTEX|GPUShaderStage.FRAGMENT,
buffer:{ type:'uniform' } }]
});
const pipelineLayout = device.createPipelineLayout({ bindGroupLayouts:[bindGroupLayout] });
const bindGroup = device.createBindGroup({
layout: bindGroupLayout,
entries: [{ binding:0, resource:{ buffer: ubo } }]
});
// 頂點輸入格式
const vertexBufferLayout = {
arrayStride: 8*4,
attributes: [
{ shaderLocation:0, offset:0, format:'float32x3' }, // pos
{ shaderLocation:1, offset:3*4, format:'float32x3' }, // normal
{ shaderLocation:2, offset:6*4, format:'float32x2' }, // uv
]
};
// RenderPipeline
const pipeline = device.createRenderPipeline({
layout: pipelineLayout,
vertex: { module: shaderModule, entryPoint:'vs_main', buffers:[vertexBufferLayout] },
primitive: { topology:'triangle-list', cullMode:'back', frontFace:'ccw' },
fragment: { module: shaderModule, entryPoint:'fs_main', targets:[{ format }] }
});
// 每幀更新 UBO(相機 + 模型旋轉)
function updateUniforms(tSec) {
const eye=[0,0.7,2.2], center=[0,0.2,0], up=[0,1,0];
const proj = m4.perspective(60*Math.PI/180, canvas.width/canvas.height, 0.1, 100);
proj[5]*=-1; // Vulkan/WebGPU NDC 的 Y 翻轉
const view = m4.lookAt(eye, center, up);
const model= m4.rotateY(tSec*0.6);
const mvp = m4.mul(proj, m4.mul(view, model));
const normalMat = m4.inverseTranspose3x3Into4x4(model);
const lightDir = new Float32Array([ -0.4, -1.0, -0.35, 0 ]);
// 佈局:mvp(0..63) + normal(64..127) + light(128..143)
device.queue.writeBuffer(ubo, 0, mvp.buffer ?? mvp);
device.queue.writeBuffer(ubo, 64, normalMat.buffer ?? normalMat);
device.queue.writeBuffer(ubo, 128, lightDir);
}
// 畫一幀
function frame(ts) {
const t = ts*0.001;
updateUniforms(t);
const encoder = device.createCommandEncoder();
const colorView = context.getCurrentTexture().createView();
const pass = encoder.beginRenderPass({
colorAttachments: [{
view: colorView,
clearValue: { r:0.06, g:0.10, b:0.16, a:1 },
loadOp:'clear', storeOp:'store'
}],
depthStencilAttachment: {
view: depthView,
depthClearValue: 1.0,
depthLoadOp:'clear', depthStoreOp:'store'
}
});
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.setVertexBuffer(0, vbuf);
pass.setIndexBuffer(ibuf, 'uint32');
pass.drawIndexed(meshCPU.indices.length);
pass.end();
device.queue.submit([encoder.finish()]);
requestAnimationFrame(frame);
}
requestAnimationFrame(frame);
把
fetch('assets/monkey.obj')
換成你的路徑,shader.wgsl
就用上面那份。
格式對齊很重要:Uniform 用兩個mat4x4
+ 一個vec4
,都是 16 byte 對齊,不會踩雷。
成果:
把模型畫出來的骨幹就是:(解析)→ 頂點/索引 Buffer → Uniform(相機/光)→ Pipeline(頂點格式/Depth)→ Render Pass 畫。
你已經把「讀檔到畫面」這條最重要的路走通了;接著加貼圖與材質,就能長成一個小型的 WebGPU 3D viewer!